import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import keras
from keras.layers import *
from keras.models import Model, Sequential
from keras.applications import inception_v3
from keras.preprocessing.image import ImageDataGenerator, load_img
wrk_dir = os.getcwd()
train_dir = os.path.join(wrk_dir,"Train")
test_dir = os.path.join(wrk_dir,"Test")
datagen = ImageDataGenerator(rescale = 1./255,shear_range = 0.2,zoom_range = 0.2,horizontal_flip = True)
train_set = datagen.flow_from_directory(train_dir,target_size=(299,299), batch_size=32,class_mode="categorical")
test_set = datagen.flow_from_directory(test_dir,target_size=(299,299), batch_size=32,class_mode="categorical")
model = inception_v3.InceptionV3(include_top= False, input_shape = (299,299,3))
for layer in model.layers:
layer.trainable = False
x = Flatten()(model.output)
predictions = Dense(len(os.listdir(train_dir)), activation="softmax")(x)
model = Model(model.input,predictions)
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
model.fit(train_set,validation_data=test_set, steps_per_epoch=len(train_set), validation_steps=len(test_set), epochs=3)
test_pred = model.predict(test_set)
test_ind = np.argmax(test_pred, axis = 1)
test_ind
label_encod = {0:"NO", 1:"Yes"}
k = 0
for i in os.listdir(test_dir):
img_dir = os.path.join(test_dir,i)
for j in os.listdir(img_dir):
img = load_img(os.path.join(img_dir,j),target_size=(299,299))
plt.imshow(img)
plt.title(label_encod[test_ind[k]])
plt.show()
k+=1
true_pred = []
k = 0
for i in os.listdir(test_dir):
for j in os.listdir(os.path.join(test_dir,i)):
if(k ==0):
true_pred.append(0)
else:
true_pred.append(1)
k+=1
true_pred = np.array(true_pred)
true_pred
from sklearn.metrics import accuracy_score
accuracy_score(test_ind,true_pred)
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(5, 5), input_shape=(224, 224, 3), padding='valid'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(filters=64, kernel_size=(5, 5), padding='valid'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(filters=128, kernel_size=(5, 5), padding='valid'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dropout(0.2))
model.add(Dense(128, init='uniform', activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(len(os.listdir(train_dir)), activation='softmax'))
model.compile(optimizer = "adam", loss = "categorical_crossentropy", metrics = ["accuracy"])
model.fit(train_set,validation_data=test_set, steps_per_epoch=len(train_set), validation_steps=len(test_set), epochs=3)
test_pred = model.predict(test_set)
test_ind = np.argmax(test_pred, axis = 1)
k = 0
for i in os.listdir(test_dir):
img_dir = os.path.join(test_dir,i)
for j in os.listdir(img_dir):
img = load_img(os.path.join(img_dir,j),target_size=(299,299))
plt.imshow(img)
plt.title(label_encod[test_ind[k]])
plt.show()
k+=1
true_pred = []
k = 0
for i in os.listdir(test_dir):
for j in os.listdir(os.path.join(test_dir,i)):
if(k ==0):
true_pred.append(0)
else:
true_pred.append(1)
k+=1
true_pred = np.array(true_pred)
from sklearn.metrics import accuracy_score
accuracy_score(test_ind,true_pred)